The goals / steps of this project are the following:
# Import Packages
import glob # OS dependencies to get file system details
import cv2
#importing some useful packages
import pickle
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from ipywidgets import interact, interactive, fixed
%matplotlib inline
def show_images(images, gray=None, divider = 2):
"""
This is an utility function to show multiple images with different colour maps
:param images - An images list
:param gray - A flag to set default value for matplotlib imshow colour map. If the image
shape is 2( i.e binary image) then cmap value will be "gray"
:return: Nothing
"""
rows = (len(images)+1)//divider
plt.figure(figsize=(16, 16))
for idx, img in enumerate(images):
plt.subplot(rows, divider, idx+1)
# if the image is binary then it'll be printed as grayscale, otherwise colour map
# will be ignored
plt.imshow(img, cmap="gray" if len(img.shape) == 2 else gray)
plt.xticks([])
plt.yticks([])
plt.show()
# Loading test images from test_image directory
camera_cal_imgs = [mpimg.imread(path) for path in glob.glob("camera_cal/*")]
# Visualize calibration images
show_images(camera_cal_imgs[:4], divider=4)
def grayscale(img, opencv_read=False):
"""
:param img:
:param opencv_read:
:return:
"""
if opencv_read:
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# prepare objects points
objp = np.zeros((6*9, 3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Arrays to store object points and iamg points from all the images
objpoints = []
imgpoints = []
def find_and_draw_chessboard(img, idx,axs, pattern_size= (9,6)):
gray = grayscale(img)
# find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, pattern_size, None)
# if found, add object points, image points
if ret:
objpoints.append(objp)
imgpoints.append(corners)
# draw and display the corners
cv2.drawChessboardCorners(img, pattern_size, corners, ret)
axs[idx].axis('off')
axs[idx].imshow(img)
# Draw subplots dynamically
fig, axs = plt.subplots(5,4, figsize=(16, 16))
axs = axs.ravel()
for idx, img in enumerate(camera_cal_imgs):
find_and_draw_chessboard(img,idx,axs)
#cache an image to further reuse
sample_img = mpimg.imread("camera_cal/calibration1.jpg")
#cache image size to further reuse
img_size = sample_img.shape[:2]
# Do Camera calibration given objects' points and images' points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,img_size, None, None)
# Save the Camera calibration results for later use
dist_pickle = {"mtx": mtx, "dist": dist}
pickle.dump( dist_pickle, open( "resources/calibration.p", "wb" ) )
def undistort(img, mtx, dist):
"""
:param img:
:param mtx:
:param dist:
:return:
"""
return cv2.undistort(img, mtx, dist,None, mtx)
def undistort_image(img, cmatrix, distc ):
"""
:param sample_img:
:param cmatrix:
:param distc:
:return:
"""
udistord_img = undistort(img, cmatrix, distc)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,16))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=18)
ax2.imshow(udistord_img)
ax2.set_title('Undistorted Image', fontsize=18)
# this can be used
return udistord_img
res = undistort_image(sample_img, mtx,dist)
test_images = [mpimg.imread(path) for path in glob.glob("test_images/*")]
#undistord images
undistort_images = list(map(lambda img: undistort_image(img, mtx, dist), test_images))
def corners_unwarp(img, src, dst):
"""
:param img: input image
:param src: source
:param dst: destination
:return:
"""
M = cv2.getPerspectiveTransform(src,dst) # magnitute
Minv = cv2.getPerspectiveTransform(dst, src)
h,w = img.shape[:2]
warped = cv2.warpPerspective(img, M, (w,h), flags=cv2.INTER_LINEAR)
return warped, Minv, M
height, width = test_images[0].shape[:2]
# source points
p1 = (575, 465)
p2 = (705, 465)
p3 = (255, 685)
p4 = (1050, 685)
line_color = (0, 255, 0) # Green
# destination points
pd1 = (450, 0)
pd2 = (width - 450, 0)
pd3 = (450, height)
pd4 = (width - 450, height)
def draw_polygon_on_image(img, line_color=(0, 255,0)):
"""
:param img:
:return:
"""
cv2.line(img, p1, p2, line_color, 3)
cv2.line(img, p2, p4, line_color, 3)
cv2.line(img, p4, p3, line_color, 3)
cv2.line(img, p3, p1, line_color, 3)
return img
src_selected_images = list(map(lambda img: draw_polygon_on_image(img), test_images))
show_images(src_selected_images)
def visualize_warped_images(img, src, dst):
unwarped, _, _ = corners_unwarp(img, src, dst)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,16))
img = draw_polygon_on_image(img)
ax1.imshow(img)
ax1.set_title('Undistorted Image', fontsize=18)
ax2.imshow(unwarped)
ax2.set_title('Unwarped Image', fontsize=18)
return unwarped
# define source and destination points for tranform
src = np.float32([p1, p2, p3, p4])
dst = np.float32([pd1, pd2, pd3, pd4])
#
warped_images = list(map(lambda img: visualize_warped_images(img, src, dst), undistort_images))
def extract_rgb_color_spaces(uwimg):
unwarp_R = uwimg[:, :, 0]
unwarp_G = uwimg[:, :, 1]
unwarp_B = uwimg[:, :, 2]
return unwarp_R,unwarp_G,unwarp_B
def extract_hsv_color_spaces(uwimg):
unwarp_HSV = cv2.cvtColor(uwimg, cv2.COLOR_RGB2HSV)
unwarp_H = unwarp_HSV[:, :, 0]
unwarp_S = unwarp_HSV[:, :, 1]
unwarp_V = unwarp_HSV[:, :, 2]
return unwarp_H,unwarp_S,unwarp_V
def extract_hsl_color_spaces(uwimg):
unwarp_HSL = cv2.cvtColor(uwimg, cv2.COLOR_RGB2HLS)
unwarp_HSL_H = unwarp_HSL[:, :, 0]
unwarp_HSL_S = unwarp_HSL[:, :, 1]
unwarp_HSL_L = unwarp_HSL[:, :, 2]
return unwarp_HSL_H,unwarp_HSL_S,unwarp_HSL_L
def extract_lab_color_spaces(uwimg):
unwarped_LAB = cv2.cvtColor(uwimg, cv2.COLOR_RGB2Lab)
unwarp_L = unwarped_LAB[:,:,0]
unwarp_A = unwarped_LAB[:,:,1]
unwarp_B = unwarped_LAB[:,:,2]
return unwarp_L, unwarp_A,unwarp_B
def apply_rgb_filter(unwarp_img):
#RGB
unwarp_R,unwarp_G ,unwarp_B = extract_rgb_color_spaces(unwarp_img)
fig, axs = plt.subplots(1, 3, figsize=(16, 16))
axs = axs.ravel()
axs[0].imshow(unwarp_R, cmap='gray')
axs[0].set_title('RGB R-channel', fontsize=12)
axs[1].imshow(unwarp_G, cmap='gray')
axs[1].set_title('RGB G-Channel', fontsize=12)
axs[2].imshow(unwarp_B, cmap='gray')
axs[2].set_title('RGB B-channel', fontsize=12)
def apply_hsv_filter(unwarp_img):
# HSV
unwarp_H,unwarp_S,unwarp_V = extract_hsv_color_spaces(unwarp_img)
fig, axs = plt.subplots(1, 3, figsize=(16, 16))
axs = axs.ravel()
axs[0].imshow(unwarp_H, cmap='gray')
axs[0].set_title('HSV H-Channel', fontsize=12)
axs[1].imshow(unwarp_S, cmap='gray')
axs[1].set_title('HSV S-channel', fontsize=12)
axs[2].imshow(unwarp_V, cmap='gray')
axs[2].set_title('HSV V-Channel', fontsize=12)
def apply_hsl_filter(unwarp_img):
# HSL
unwarp_HSL_H,unwarp_HSL_S,unwarp_HSL_L = extract_hsl_color_spaces(unwarp_img)
fig, axs = plt.subplots(1, 3, figsize=(16, 16))
axs = axs.ravel()
axs[0].imshow(unwarp_HSL_H, cmap='gray')
axs[0].set_title('HSL H-Channel', fontsize=12)
axs[1].imshow(unwarp_HSL_S, cmap='gray')
axs[1].set_title('HSL S-channel', fontsize=12)
axs[2].imshow(unwarp_HSL_L, cmap='gray')
axs[2].set_title('HSL V-Channel', fontsize=12)
def apply_lab_filter(unwarp_img):
# LAB
unwarp_L, unwarp_A,unwarp_B = extract_lab_color_spaces(unwarp_img)
fig, axs = plt.subplots(1, 3, figsize=(16, 16))
axs = axs.ravel()
axs[0].imshow(unwarp_L, cmap='gray')
axs[0].set_title('LAB L-Channel', fontsize=12)
axs[1].imshow(unwarp_A, cmap='gray')
axs[1].set_title('LAB A-channel', fontsize=12)
axs[2].imshow(unwarp_B, cmap='gray')
axs[2].set_title('LAB B-Channel', fontsize=12)
def apply_color_filter(unwarp_img):
test = np.copy(unwarp_img)
# apply_rgb_filter(test)
# apply_hsv_filter(test)
# apply_hsl_filter(test)
apply_lab_filter(test)
sample_img_test = warped_images[0]
apply_color_filter(sample_img_test)
sample_img_test = warped_images[1]
apply_color_filter(sample_img_test)
def abs_sobel_thresh(gray, orient='x', thresh_min=0, thresh_max=255):
# Apply the following steps to img
# 1) Convert to grayscale
# gray = grayscale(img)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
dx = 1 if orient=='x' else 0
dy = 1 if orient=='y' else 0
sobel = cv2.Sobel(gray, cv2.CV_64F,dx ,dy)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
binary_sobel = np.zeros_like(scaled_sobel)
binary_sobel[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return binary_sobel
def apply_sobel_threshold(img, gray, min_thresh, max_thresh):
"""
:param unwarp_img:
:param min_thresh:
:param max_thresh:
:return:
"""
abs_sobel = abs_sobel_thresh(gray, 'x', min_thresh, max_thresh)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 16))
ax1.imshow(img)
ax1.set_title('Unwarped Image', fontsize=18)
ax2.imshow(abs_sobel, cmap='gray')
ax2.set_title('Sobel Absolute', fontsize=18)
plt.show()
min_thresh=50
max_thresh=190
sample_img = warped_images[1]
gray = grayscale(sample_img)
apply_sobel_threshold(sample_img,gray,min_thresh, max_thresh)
sample_img = warped_images[1]
gray_rgb,_,_ = extract_rgb_color_spaces(sample_img)
apply_sobel_threshold(sample_img,gray_rgb,min_thresh, max_thresh)
sample_img = warped_images[1]
_,_,gray_hsv = extract_hsv_color_spaces(sample_img)
apply_sobel_threshold(sample_img,gray_hsv,min_thresh, max_thresh)
sample_img = warped_images[1]
_,gray_hsl,_ = extract_hsl_color_spaces(sample_img)
apply_sobel_threshold(sample_img,gray_hsl,min_thresh, max_thresh)
sample_img = warped_images[1]
gray_lab,_,_ = extract_lab_color_spaces(sample_img)
apply_sobel_threshold(sample_img,gray_lab,min_thresh, max_thresh)
def mag_threshold(gray, sobel_kernel=3, mag_thresh=(0, 255)):
# Convert to grayscale
# gray = grayscale(img) # extract HSL L color scpace
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# Return the binary image
return binary_output
def apply_sobel_mag_gradient(uwimg,gray,sobel_kernel, min_thresh, max_thresh):
sobel_mag = mag_threshold(gray, sobel_kernel,(min_thresh, max_thresh))
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,16))
ax1.imshow(uwimg)
ax1.set_title('Unwarped Image', fontsize=18)
ax2.imshow(sobel_mag, cmap='gray')
ax2.set_title('Sobel Magnitude', fontsize=18)
min_thresh=50
max_thresh=190
sobel_kernel = 15
sample_img = warped_images[1]
gray = grayscale(sample_img)
apply_sobel_mag_gradient(sample_img, gray, sobel_kernel, min_thresh, max_thresh)
sample_img = warped_images[1]
gray_rgb,_,_ = extract_rgb_color_spaces(sample_img)
apply_sobel_mag_gradient(sample_img, gray_rgb, sobel_kernel, min_thresh, max_thresh)
sample_img = warped_images[1]
_,_,gray_hsv = extract_hsv_color_spaces(sample_img)
apply_sobel_mag_gradient(sample_img, gray_hsv, sobel_kernel, min_thresh, max_thresh)
sample_img = warped_images[1]
_,gray_hsl,_ = extract_hsl_color_spaces(sample_img)
apply_sobel_mag_gradient(sample_img, gray_hsl, sobel_kernel, min_thresh, max_thresh)
sample_img = warped_images[1]
gray_lab,_,_ = extract_lab_color_spaces(sample_img)
apply_sobel_mag_gradient(sample_img, gray_lab, sobel_kernel, min_thresh, max_thresh)
# Define a function to threshold an image for a given range and Sobel kernel
def dir_threshold(gray, sobel_kernel=3, thresh=(0, np.pi/2)):
# Grayscale
# gray = grayscale(img) # extract HSL L color scpace
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.ones_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 0
# Return the binary image
return binary_output
def apply_sobel_dir_gradient(uwimg,gray,sobel_kernel, min_thresh, max_thresh):
sobel_mag = dir_threshold(gray, sobel_kernel,(min_thresh, max_thresh))
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,16))
ax1.imshow(uwimg)
ax1.set_title('Unwarped Image', fontsize=18)
ax2.imshow(sobel_mag, cmap='gray')
ax2.set_title('Sobel Direction', fontsize=18)
dir_min_thresh=0.3
dir_max_thresh=1.51
dir_sobel_kernel = 15
sample_img = warped_images[1]
gray = grayscale(sample_img)
apply_sobel_dir_gradient(sample_img, gray, dir_sobel_kernel, dir_min_thresh, dir_max_thresh)
sample_img = warped_images[1]
gray_rgb,_,_ = extract_rgb_color_spaces(sample_img)
apply_sobel_dir_gradient(sample_img, gray_rgb, dir_sobel_kernel, dir_min_thresh, dir_max_thresh)
sample_img = warped_images[1]
_,_,gray_hsv = extract_hsv_color_spaces(sample_img)
apply_sobel_dir_gradient(sample_img, gray_hsv, dir_sobel_kernel, dir_min_thresh, dir_max_thresh)
sample_img = warped_images[1]
_,gray_hsl,_ = extract_hsl_color_spaces(sample_img)
apply_sobel_dir_gradient(sample_img, gray_hsl, dir_sobel_kernel, dir_min_thresh, dir_max_thresh)
sample_img = warped_images[1]
gray_lab,_,_ = extract_lab_color_spaces(sample_img)
apply_sobel_dir_gradient(sample_img, gray_lab, dir_sobel_kernel, dir_min_thresh, dir_max_thresh)
def combine_thresholds(unwarp_img, gray, mag_kernel, mag_thresh, dir_thresh, dir_kernel ):
gradx = abs_sobel_thresh(gray, orient='x', thresh_min=mag_thresh[0], thresh_max=mag_thresh[1])
grady = abs_sobel_thresh(gray, orient='y', thresh_min=mag_thresh[0], thresh_max=mag_thresh[1])
mag_binary = mag_threshold(gray, sobel_kernel=mag_kernel, mag_thresh=mag_thresh)
dir_binary = dir_threshold(gray, sobel_kernel=dir_kernel, thresh=dir_thresh)
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
return combined
def visualize_combine_imgages(unwarp_img, gray, mag_kernel, mag_thresh, dir_thresh, dir_kernel):
combined = combine_thresholds(unwarp_img, gray, mag_kernel, mag_thresh, dir_thresh, dir_kernel )
# Visualize sobel magnitude + direction threshold
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,16))
ax1.imshow(unwarp_img)
ax1.set_title('Unwarped Image', fontsize=18)
ax2.imshow(combined, cmap='gray')
ax2.set_title('Sobel Magnitude + Direction', fontsize=18)
dir_thresh= (0.3 ,1.51)
dir_sobel_kernel = 15
mag_thresh = (50, 190)
mg_sobel_kernel = 15
# sample_img = warped_images[1]
for sample_img in warped_images[:2]:
gray = grayscale(sample_img)
visualize_combine_imgages(sample_img, gray, mg_sobel_kernel,mag_thresh, dir_thresh, dir_sobel_kernel)
# sample_img = warped_images[1]
for sample_img in warped_images[:2]:
gray_rgb,_,_ = extract_rgb_color_spaces(sample_img)
visualize_combine_imgages(sample_img, gray_rgb, mg_sobel_kernel,mag_thresh, dir_thresh, dir_sobel_kernel)
# sample_img = warped_images[1]
for sample_img in warped_images[:2]:
_,_,gray_hsv = extract_hsv_color_spaces(sample_img)
visualize_combine_imgages(sample_img, gray_hsv, mg_sobel_kernel,mag_thresh, dir_thresh, dir_sobel_kernel)
for warped_img in warped_images[:2]:
_,gray_hsl,_ = extract_hsl_color_spaces(warped_img)
visualize_combine_imgages(warped_img, gray_hsl, mg_sobel_kernel,mag_thresh, dir_thresh, dir_sobel_kernel)
for sample_img in warped_images[:2]:
gray_lab,_,_ = extract_lab_color_spaces(sample_img)
visualize_combine_imgages(sample_img, gray_lab, mg_sobel_kernel,mag_thresh, dir_thresh, dir_sobel_kernel)
# Use exclusive lower bound (>) and inclusive upper (<=)
def hsl_threshold(img, thresh=(220, 255)):
# 1) Convert to HLS color space
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
hls_l = hls[:,:,1]
hls_l = hls_l*(255/np.max(hls_l))
# 2) Apply a threshold to the L channel
binary_output = np.zeros_like(hls_l)
binary_output[(hls_l > thresh[0]) & (hls_l <= thresh[1])] = 1
# 3) Return a binary image of threshold result
return binary_output
sample_img = warped_images[1]
hsl_s = hsl_threshold(sample_img)
plt.imshow(hsl_s, cmap='gray')
plt.show()
def lab_threshold(unwarped_img, thresh=(190,255)):
lab = cv2.cvtColor(unwarped_img, cv2.COLOR_RGB2Lab)
lab_b = lab[:,:,2]
# don't normalize if there are no yellows in the image
if np.max(lab_b) > 175:
lab_b = lab_b*(255/np.max(lab_b))
# 2) Apply a threshold to the L channel
binary_output = np.zeros_like(lab_b)
binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 1
# 3) Return a binary image of threshold result
return binary_output
sample_img = warped_images[0]
lab = lab_threshold(sample_img)
plt.imshow(lab, cmap='gray')
plt.show()
def pipeline(p_img):
# Undistort images
undistort_img = undistort(p_img, mtx,dist)
# Persfective transform
img_unwarp,M, Minv = corners_unwarp(undistort_img, src, dst)
# HLS L-channel Threshold (using default parameters)
img_hsl_L = hsl_threshold(img_unwarp)
# Lab B-channel Threshold (using default parameters)
img_lab_B = lab_threshold(img_unwarp)
# Combine HLS and Lab B channel thresholds
combined = np.zeros_like(img_lab_B)
combined[(img_hsl_L == 1) | (img_lab_B == 1)] = 1
return combined, Minv
# Make a list of example images
images = glob.glob('./test_images/*.jpg')
# Set up plot
fig, axs = plt.subplots(len(images),2, figsize=(16, 16))
# fig.subplots_adjust(hspace = .2, wspace=.001)
axs = axs.ravel()
i = 0
for image in images:
img = cv2.imread(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_bin,MinV = pipeline(img)
axs[i].imshow(img)
axs[i].axis('off')
i += 1
axs[i].imshow(img_bin, cmap='gray')
axs[i].axis('off')
i += 1
def hist(img):
# Grab only the bottom half of the image
# Lane lines are likely to be mostly vertical nearest to the car
bottom_half = img[img.shape[0]//2:,:]
# Sum across image pixels vertically - make sure to set an `axis`
# i.e. the highest areas of vertical lines should be larger values
histogram = np.sum(bottom_half, axis=0)
return histogram
img = cv2.imread(images[0])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_bin,Minv = pipeline(img)
histogram = hist(img_bin)
# print(histogram)
plt.plot(histogram)
plt.show()
# Define method to fit polynomial to binary image with lines extracted, using sliding window
def sliding_window_polyfit(img):
# Take a histogram of the bottom half of the image
histogram = hist(img)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
quarter_point = np.int(midpoint//2)
# Previously the left/right base was the max of the left/right half of the histogram
# this changes it so that only a quarter of the histogram (directly to the left/right) is considered
leftx_base = np.argmax(histogram[quarter_point:midpoint]) + quarter_point
rightx_base = np.argmax(histogram[midpoint:(midpoint+quarter_point)]) + midpoint
#print('base pts:', leftx_base, rightx_base)
# Choose the number of sliding windows
nwindows = 10
# Set height of windows
window_height = np.int(img.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 80
# Set minimum number of pixels found to recenter window
minpix = 40
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Rectangle data for visualization
rectangle_data = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
rectangle_data.append((win_y_low, win_y_high, win_xleft_low, win_xleft_high, win_xright_low, win_xright_high))
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fit, right_fit = (None, None)
# Fit a second order polynomial to each
if len(leftx) != 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit = np.polyfit(righty, rightx, 2)
visualization_data = (rectangle_data, histogram)
return left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data
def split_windows(img):
img_bin,Minv = pipeline(img)
left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data = sliding_window_polyfit(img_bin)
h = img.shape[0]
left_fit_x_int = left_fit[0]*h**2 + left_fit[1]*h + left_fit[2]
right_fit_x_int = right_fit[0]*h**2 + right_fit[1]*h + right_fit[2]
#print('fit x-intercepts:', left_fit_x_int, right_fit_x_int)
rectangles = visualization_data[0]
histogram = visualization_data[1]
# Create an output image to draw on and visualize the result
out_img = np.uint8(np.dstack((img_bin, img_bin, img_bin))*255)
# Generate x and y values for plotting
ploty = np.linspace(0, img_bin.shape[0]-1, img_bin.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
for rect in rectangles:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(rect[2],rect[0]),(rect[3],rect[1]),(0,255,0), 2)
cv2.rectangle(out_img,(rect[4],rect[0]),(rect[5],rect[1]),(0,255,0), 2)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img_bin.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [100, 200, 255]
return out_img,left_fitx,right_fitx,ploty
# visualize the result on example image
images = glob.glob('./test_images/*.jpg')
for image in images:
img = cv2.imread(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
out_img,left_fitx,right_fitx,ploty = split_windows(img)
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
# Define method to fit polynomial to binary image based upon a previous fit (chronologically speaking);
# this assumes that the fit will not change significantly from one video frame to the next
def polyfit_using_prev_fit(binary_warped, left_fit_prev, right_fit_prev):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 80
left_lane_inds = ((nonzerox > (left_fit_prev[0]*(nonzeroy**2) + left_fit_prev[1]*nonzeroy + left_fit_prev[2] - margin)) &
(nonzerox < (left_fit_prev[0]*(nonzeroy**2) + left_fit_prev[1]*nonzeroy + left_fit_prev[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit_prev[0]*(nonzeroy**2) + right_fit_prev[1]*nonzeroy + right_fit_prev[2] - margin)) &
(nonzerox < (right_fit_prev[0]*(nonzeroy**2) + right_fit_prev[1]*nonzeroy + right_fit_prev[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fit_new, right_fit_new = (None, None)
if len(leftx) != 0:
# Fit a second order polynomial to each
left_fit_new = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit_new = np.polyfit(righty, rightx, 2)
return left_fit_new, right_fit_new, left_lane_inds, right_lane_inds
margin = 80
def draw_polygon_and_fill(img_bin):
left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data = sliding_window_polyfit(img_bin)
left_fit2, right_fit2, left_lane_inds2, right_lane_inds2 = polyfit_using_prev_fit(img_bin, left_fit, right_fit)
# Generate x and y values for plotting
ploty = np.linspace(0, img_bin.shape[0]-1, img_bin.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
left_fitx2 = left_fit2[0]*ploty**2 + left_fit2[1]*ploty + left_fit2[2]
right_fitx2 = right_fit2[0]*ploty**2 + right_fit2[1]*ploty + right_fit2[2]
# Create an image to draw on and an image to show the selection window
out_img = np.uint8(np.dstack((img_bin, img_bin, img_bin))*255)
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
nonzero = img_bin.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img[nonzeroy[left_lane_inds2], nonzerox[left_lane_inds2]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds2], nonzerox[right_lane_inds2]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area (OLD FIT)
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
return result, left_fitx2, right_fitx2, ploty
# visualize the result on example image
images = glob.glob('./test_images/*.jpg')
for image in images:
img = cv2.imread(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_bin, Minv = pipeline(img)
result,left_fitx,right_fitx,ploty = draw_polygon_and_fill(img_bin)
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
def u_test():
# visualize the result on example image
exampleImg = cv2.imread('./test_images/test2.jpg')
exampleImg = cv2.cvtColor(exampleImg, cv2.COLOR_BGR2RGB)
exampleImg_bin, Minv = pipeline(exampleImg)
left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data = sliding_window_polyfit(exampleImg_bin)
h = exampleImg.shape[0]
left_fit_x_int = left_fit[0]*h**2 + left_fit[1]*h + left_fit[2]
right_fit_x_int = right_fit[0]*h**2 + right_fit[1]*h + right_fit[2]
#print('fit x-intercepts:', left_fit_x_int, right_fit_x_int)
rectangles = visualization_data[0]
histogram = visualization_data[1]
# Create an output image to draw on and visualize the result
out_img = np.uint8(np.dstack((exampleImg_bin, exampleImg_bin, exampleImg_bin))*255)
# Generate x and y values for plotting
ploty = np.linspace(0, exampleImg_bin.shape[0]-1, exampleImg_bin.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
for rect in rectangles:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(rect[2],rect[0]),(rect[3],rect[1]),(0,255,0), 2)
cv2.rectangle(out_img,(rect[4],rect[0]),(rect[5],rect[1]),(0,255,0), 2)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = exampleImg_bin.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [100, 200, 255]
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
u_test()
print('...')
...
def u_test2():
# visualize the result on example image
exampleImg2 = cv2.imread('./test_images/test5.jpg')
exampleImg2 = cv2.cvtColor(exampleImg2, cv2.COLOR_BGR2RGB)
exampleImg2_bin, Minv = pipeline(exampleImg2)
margin = 80
left_fit2, right_fit2, left_lane_inds2, right_lane_inds2 = polyfit_using_prev_fit(exampleImg2_bin, left_fit, right_fit)
# Generate x and y values for plotting
ploty = np.linspace(0, exampleImg2_bin.shape[0]-1, exampleImg2_bin.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
left_fitx2 = left_fit2[0]*ploty**2 + left_fit2[1]*ploty + left_fit2[2]
right_fitx2 = right_fit2[0]*ploty**2 + right_fit2[1]*ploty + right_fit2[2]
# Create an image to draw on and an image to show the selection window
out_img = np.uint8(np.dstack((exampleImg2_bin, exampleImg2_bin, exampleImg2_bin))*255)
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
nonzero = exampleImg2_bin.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img[nonzeroy[left_lane_inds2], nonzerox[left_lane_inds2]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds2], nonzerox[right_lane_inds2]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area (OLD FIT)
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.imshow(result)
plt.plot(left_fitx2, ploty, color='yellow')
plt.plot(right_fitx2, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
u_test2()
print('...')
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-65-dfbcc1be12d3> in <module> 45 plt.ylim(720, 0) 46 ---> 47 u_test2() 48 print('...') <ipython-input-65-dfbcc1be12d3> in u_test2() 6 margin = 80 7 ----> 8 left_fit2, right_fit2, left_lane_inds2, right_lane_inds2 = polyfit_using_prev_fit(exampleImg2_bin, left_fit, right_fit) 9 10 # Generate x and y values for plotting NameError: name 'left_fit' is not defined
# # Download IPython notebook as HTML file
import os
os.system('jupyter nbconvert --to html Advanced_Lane_Finding.ipynb')